Skip to content

Conversation

bigcat88
Copy link
Contributor

@bigcat88 bigcat88 commented Oct 1, 2025

Most nodes were tested after conversion:

Screenshot From 2025-10-01 09-47-07 Screenshot From 2025-10-01 09-23-06

Git diff:

diff --git a/v3_object_info.json b/master_object_info.json
index b969950..7f7f7c6 100644
--- a/v3_object_info.json
+++ b/master_object_info.json
@@ -20072,8 +20072,7 @@
         "input": {
             "required": {
                 "clip": [
-                    "CLIP",
-                    {}
+                    "CLIP"
                 ],
                 "bert": [
                     "STRING",
@@ -20107,29 +20106,21 @@
         "output_name": [
             "CONDITIONING"
         ],
-        "output_tooltips": [
-            null
-        ],
         "name": "CLIPTextEncodeHunyuanDiT",
-        "display_name": null,
+        "display_name": "CLIPTextEncodeHunyuanDiT",
         "description": "",
         "python_module": "comfy_extras.nodes_hunyuan",
         "category": "advanced/conditioning",
-        "output_node": false,
-        "deprecated": false,
-        "experimental": false,
-        "api_node": false
+        "output_node": false
     },
     "TextEncodeHunyuanVideo_ImageToVideo": {
         "input": {
             "required": {
                 "clip": [
-                    "CLIP",
-                    {}
+                    "CLIP"
                 ],
                 "clip_vision_output": [
-                    "CLIP_VISION_OUTPUT",
-                    {}
+                    "CLIP_VISION_OUTPUT"
                 ],
                 "prompt": [
                     "STRING",
@@ -20141,10 +20132,10 @@
                 "image_interleave": [
                     "INT",
                     {
-                        "tooltip": "How much the image influences things vs the text prompt. Higher number means more influence from the text prompt.",
                         "default": 2,
                         "min": 1,
-                        "max": 512
+                        "max": 512,
+                        "tooltip": "How much the image influences things vs the text prompt. Higher number means more influence from the text prompt."
                     }
                 ]
             }
@@ -20166,18 +20157,12 @@
         "output_name": [
             "CONDITIONING"
         ],
-        "output_tooltips": [
-            null
-        ],
         "name": "TextEncodeHunyuanVideo_ImageToVideo",
-        "display_name": null,
+        "display_name": "TextEncodeHunyuanVideo_ImageToVideo",
         "description": "",
         "python_module": "comfy_extras.nodes_hunyuan",
         "category": "advanced/conditioning",
-        "output_node": false,
-        "deprecated": false,
-        "experimental": false,
-        "api_node": false
+        "output_node": false
     },
     "EmptyHunyuanLatentVideo": {
         "input": {
@@ -20236,29 +20221,21 @@
         "output_name": [
             "LATENT"
         ],
-        "output_tooltips": [
-            null
-        ],
         "name": "EmptyHunyuanLatentVideo",
-        "display_name": null,
+        "display_name": "EmptyHunyuanLatentVideo",
         "description": "",
         "python_module": "comfy_extras.nodes_hunyuan",
         "category": "latent/video",
-        "output_node": false,
-        "deprecated": false,
-        "experimental": false,
-        "api_node": false
+        "output_node": false
     },
     "HunyuanImageToVideo": {
         "input": {
             "required": {
                 "positive": [
-                    "CONDITIONING",
-                    {}
+                    "CONDITIONING"
                 ],
                 "vae": [
-                    "VAE",
-                    {}
+                    "VAE"
                 ],
                 "width": [
                     "INT",
@@ -20296,21 +20273,16 @@
                     }
                 ],
                 "guidance_type": [
-                    "COMBO",
-                    {
-                        "multiselect": false,
-                        "options": [
-                            "v1 (concat)",
-                            "v2 (replace)",
-                            "custom"
-                        ]
-                    }
+                    [
+                        "v1 (concat)",
+                        "v2 (replace)",
+                        "custom"
+                    ]
                 ]
             },
             "optional": {
                 "start_image": [
-                    "IMAGE",
-                    {}
+                    "IMAGE"
                 ]
             }
         },
@@ -20340,19 +20312,12 @@
             "positive",
             "latent"
         ],
-        "output_tooltips": [
-            null,
-            null
-        ],
         "name": "HunyuanImageToVideo",
-        "display_name": null,
+        "display_name": "HunyuanImageToVideo",
         "description": "",
         "python_module": "comfy_extras.nodes_hunyuan",
         "category": "conditioning/video_models",
-        "output_node": false,
-        "deprecated": false,
-        "experimental": false,
-        "api_node": false
+        "output_node": false
     },
     "EmptyHunyuanImageLatent": {
         "input": {
@@ -20401,33 +20366,24 @@
         "output_name": [
             "LATENT"
         ],
-        "output_tooltips": [
-            null
-        ],
         "name": "EmptyHunyuanImageLatent",
-        "display_name": null,
+        "display_name": "EmptyHunyuanImageLatent",
         "description": "",
         "python_module": "comfy_extras.nodes_hunyuan",
         "category": "latent",
-        "output_node": false,
-        "deprecated": false,
-        "experimental": false,
-        "api_node": false
+        "output_node": false
     },
     "HunyuanRefinerLatent": {
         "input": {
             "required": {
                 "positive": [
-                    "CONDITIONING",
-                    {}
+                    "CONDITIONING"
                 ],
                 "negative": [
-                    "CONDITIONING",
-                    {}
+                    "CONDITIONING"
                 ],
                 "latent": [
-                    "LATENT",
-                    {}
+                    "LATENT"
                 ],
                 "noise_augmentation": [
                     "FLOAT",
@@ -20463,20 +20419,12 @@
             "negative",
             "latent"
         ],
-        "output_tooltips": [
-            null,
-            null,
-            null
-        ],
         "name": "HunyuanRefinerLatent",
-        "display_name": null,
+        "display_name": "HunyuanRefinerLatent",
         "description": "",
         "python_module": "comfy_extras.nodes_hunyuan",
         "category": "sd",
-        "output_node": false,
-        "deprecated": false,
-        "experimental": false,
-        "api_node": false
+        "output_node": false
     },
     "CLIPTextEncodeFlux": {
         "input": {

@bigcat88 bigcat88 requested a review from Kosinkadink as a code owner October 1, 2025 06:47
@bigcat88
Copy link
Contributor Author

bigcat88 commented Oct 1, 2025

+label: Core

@Kosinkadink
Copy link
Collaborator

Diff looks good, will merge on Friday Oct. 3rd to give node author some time.

@Kosinkadink Kosinkadink added the Scheduled Merge PR is reviewed and ready, but will be merged at a specific time. label Oct 1, 2025
@Kosinkadink
Copy link
Collaborator

Holding off a bit as this PR will have conflicts with a PR that adds nodes in v1 format: https://github.com/comfyanonymous/ComfyUI/pull/9882/files

Once I know the timeline, I can decide whether to merge now and help with the conversion, or merge after that PR is done.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
Core Core team dependency Scheduled Merge PR is reviewed and ready, but will be merged at a specific time.
Projects
None yet
Development

Successfully merging this pull request may close these issues.

3 participants